Finetuning a model with Transformers

Published

May 10, 2023

import torch
import evaluate
import numpy as np

from tqdm.auto import tqdm
from torch.utils.data import DataLoader
from datasets import load_dataset
from transformers import AdamW
from transformers import get_scheduler
from transformers import AutoTokenizer, DataCollatorWithPadding
from transformers import AutoModelForSequenceClassification
checkpoint = "bert-base-uncased"
raw_datasets = load_dataset('glue', 'mrpc')
Found cached dataset glue (/home/akhlak/.cache/huggingface/datasets/glue/mrpc/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)
100%|██████████| 3/3 [00:00<00:00, 907.92it/s]
tokenizer = AutoTokenizer.from_pretrained(checkpoint)

def tokenize_function(subset):
    return tokenizer(subset['sentence1'], subset['sentence2'],
                     truncation=True)
tokeninzed_datasets = raw_datasets.map(tokenize_function, batched=True)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
Loading cached processed dataset at /home/akhlak/.cache/huggingface/datasets/glue/mrpc/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad/cache-000c138c4db1edcb.arrow
Loading cached processed dataset at /home/akhlak/.cache/huggingface/datasets/glue/mrpc/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad/cache-ae70e2242c4dfe9f.arrow
# Update dataset for model, the classes should be in column 'labels'
tokeninzed_datasets = tokeninzed_datasets.rename_column("label", "labels")

# Remove original raw columns
tokeninzed_datasets = tokeninzed_datasets.remove_columns(["sentence1", "sentence2", "idx"])

# Return pytorch tensors, not python lists
tokeninzed_datasets.set_format("torch")
train_dataloader = DataLoader(tokeninzed_datasets['train'],
                              shuffle=True, batch_size=8,
                              collate_fn=data_collator)
eval_dataloader = DataLoader(tokeninzed_datasets['validation'],
                             batch_size=8, collate_fn=data_collator)
# Check dataloader batch
for batch in train_dataloader:
    break
{k : v.shape for k, v in batch.items()}
{'labels': torch.Size([8]),
 'input_ids': torch.Size([8, 70]),
 'token_type_ids': torch.Size([8, 70]),
 'attention_mask': torch.Size([8, 70])}
model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)
Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForSequenceClassification: ['cls.predictions.decoder.weight', 'cls.predictions.bias', 'cls.seq_relationship.bias', 'cls.predictions.transform.dense.bias', 'cls.seq_relationship.weight', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.weight']
- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
# Check model is working
outputs = model(**batch)
print(outputs.loss, outputs.logits.shape)
tensor(0.7214, grad_fn=<NllLossBackward0>) torch.Size([8, 2])
# Optimizer
optimizer = AdamW(model.parameters(), lr=5e-5)
/home/akhlak/micromamba/envs/pytorch/lib/python3.11/site-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning
  warnings.warn(
num_epochs = 3
num_training_steps = num_epochs * len(train_dataloader)
lr_scheduler = get_scheduler("linear", optimizer=optimizer,
                             num_warmup_steps=0,
                             num_training_steps=num_training_steps)
print(num_training_steps)
1377
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
device
device(type='cuda')
progress_bar = tqdm(range(num_training_steps))

model.train()
for epoch in range(num_epochs):
    for batch in train_dataloader:
        batch = { k : v.to(device) for k, v in batch.items()}
        outputs = model(**batch)
        loss = outputs.loss
        loss.backward()

        optimizer.step()
        lr_scheduler.step()
        optimizer.zero_grad()
        progress_bar.update(1)
100%|██████████| 1377/1377 [02:05<00:00, 11.88it/s]
metric = evaluate.load("glue", "mrpc")
model.eval()

for batch in eval_dataloader:
    batch = {k : v.to(device) for k, v in batch.items()}
    with torch.no_grad():
        outputs = model(**batch)

    logits = outputs.logits
    predictions = torch.argmax(logits, dim=-1)
    metric.add_batch(predictions=predictions, references=batch['labels'])

metric.compute()
Downloading builder script: 100%|██████████| 5.75k/5.75k [00:00<00:00, 4.16MB/s]
{'accuracy': 0.8676470588235294, 'f1': 0.9072164948453608}
Back to top